library(dplyr)
##
## Attaching package: 'dplyr'
## The following objects are masked from 'package:stats':
##
## filter, lag
## The following objects are masked from 'package:base':
##
## intersect, setdiff, setequal, union
library(fGarch)
## Loading required package: timeDate
## Loading required package: timeSeries
## Loading required package: fBasics
library(dynlm)
## Loading required package: zoo
##
## Attaching package: 'zoo'
## The following object is masked from 'package:timeSeries':
##
## time<-
## The following objects are masked from 'package:base':
##
## as.Date, as.Date.numeric
library(tseries)
## Registered S3 method overwritten by 'xts':
## method from
## as.zoo.xts zoo
## Registered S3 method overwritten by 'quantmod':
## method from
## as.zoo.data.frame zoo
library(astsa)
##
## Attaching package: 'astsa'
## The following object is masked from 'package:fBasics':
##
## nyse
library(xts)
##
## Attaching package: 'xts'
## The following objects are masked from 'package:dplyr':
##
## first, last
library(fpp)
## Loading required package: forecast
## Registered S3 methods overwritten by 'forecast':
## method from
## fitted.fracdiff fracdiff
## residuals.fracdiff fracdiff
##
## Attaching package: 'forecast'
## The following object is masked from 'package:astsa':
##
## gas
## Loading required package: fma
##
## Attaching package: 'fma'
## The following objects are masked from 'package:astsa':
##
## chicken, sales
## Loading required package: expsmooth
## Loading required package: lmtest
##
## Attaching package: 'fpp'
## The following object is masked from 'package:astsa':
##
## oil
library(TSA)
## Registered S3 methods overwritten by 'TSA':
## method from
## fitted.Arima forecast
## plot.Arima forecast
##
## Attaching package: 'TSA'
## The following objects are masked from 'package:timeDate':
##
## kurtosis, skewness
## The following objects are masked from 'package:stats':
##
## acf, arima
## The following object is masked from 'package:utils':
##
## tar
library(forecast)
library(dplR)
library(tseries)
ifs <- read.csv("IFS.csv",stringsAsFactors =FALSE)
colnames(ifs)=c("Country_Name", "Country_Code", "Indicator_Name", "Indicator_Code", "Time_Period", "Value", "Status")
# remove Status column
ifs=ifs[, c(1:6)]
# take a look at the dataset
head(ifs)
## Country_Name Country_Code
## 1 United States 111
## 2 United States 111
## 3 United States 111
## 4 United States 111
## 5 United States 111
## 6 United States 111
## Indicator_Name
## 1 International Reserves, Official Reserve Assets, US Dollars
## 2 International Reserves, Official Reserve Assets, US Dollars
## 3 International Reserves, Official Reserve Assets, US Dollars
## 4 International Reserves, Official Reserve Assets, US Dollars
## 5 International Reserves, Official Reserve Assets, US Dollars
## 6 International Reserves, Official Reserve Assets, US Dollars
## Indicator_Code Time_Period Value
## 1 RAFA_USD 1980M5 21917557876
## 2 RAFA_USD 1980M9 22994402034
## 3 RAFA_USD 1980M11 25673025848
## 4 RAFA_USD 1981M4 29692779278
## 5 RAFA_USD 1981M9 29715202935
## 6 RAFA_USD 1982M4 31555856317
# dim(ifs)
print(colnames(ifs))
## [1] "Country_Name" "Country_Code" "Indicator_Name" "Indicator_Code"
## [5] "Time_Period" "Value"
United States
df1_us=subset(ifs, (Indicator_Code=='AIP_IX') & (Country_Name=='United States'))
df1_us$Time_Period <- gsub("M", "/", df1_us$Time_Period)
df1_us['date'] = '/01'
df1_us$Time_Period <- paste(df1_us$Time_Period, df1_us$date, sep="")
df1_us$Time_Period = as.Date(df1_us$Time_Period, "%Y/%m/%d")
df1_us = df1_us[order(df1_us$Time_Period),]
df1_us = subset(df1_us, select = -c(date))
rownames(df1_us)=NULL
# Research Indicator: Economic Activity, Industrial Production, Index
# Indicator Code: AIP_IX
# Country: USA
# the original data
df11=ts(df1_us$Value, frequency = 12, start = c(1980, 1))
plot(df11, main="United States: Industrial Production Index", xlab='year', ylab='US IPI')
plot(decompose(df11))
sp=spectrum(df11, log='no')
# find the cycle with predominant frequecny
sp$fre[which.max(sp$spec)] # 0.025
## [1] 0.025
# find the cycle of predominant frequecny
1/sp$fre[which.max(sp$spec)] # 40 month
## [1] 40
max(sp$spec) #estimate of spectral density at predominant period which is close to 326.3907.
## [1] 326.3907
U = qchisq(.025,2)
L = qchisq(.975,2)
2*max(sp$spec)/L #lower bound for spectral density
## [1] 88.47962
2*max(sp$spec)/U #upper bound for spectral density
## [1] 12891.74
# The 95% confidence interval is [88.47962, 12891.74]
fit1 <- auto.arima(df11, trace = TRUE) # Best model: ARIMA(4,0,1)(2,1,1)[12] with drift # AIC: 1022.693
##
## Fitting models using approximations to speed things up...
##
## ARIMA(2,0,2)(1,1,1)[12] with drift : 987.9367
## ARIMA(0,0,0)(0,1,0)[12] with drift : 2487.57
## ARIMA(1,0,0)(1,1,0)[12] with drift : 1102.752
## ARIMA(0,0,1)(0,1,1)[12] with drift : 1972.828
## ARIMA(0,0,0)(0,1,0)[12] : 2566.802
## ARIMA(2,0,2)(0,1,1)[12] with drift : 999.8981
## ARIMA(2,0,2)(1,1,0)[12] with drift : 1029.682
## ARIMA(2,0,2)(2,1,1)[12] with drift : 985.0017
## ARIMA(2,0,2)(2,1,0)[12] with drift : 989.1653
## ARIMA(2,0,2)(2,1,2)[12] with drift : 979.5924
## ARIMA(2,0,2)(1,1,2)[12] with drift : 980.8966
## ARIMA(1,0,2)(2,1,2)[12] with drift : 1022.589
## ARIMA(2,0,1)(2,1,2)[12] with drift : 1003.046
## ARIMA(3,0,2)(2,1,2)[12] with drift : 1041.516
## ARIMA(2,0,3)(2,1,2)[12] with drift : 978.6119
## ARIMA(2,0,3)(1,1,2)[12] with drift : 979.8452
## ARIMA(2,0,3)(2,1,1)[12] with drift : 983.9795
## ARIMA(2,0,3)(1,1,1)[12] with drift : 987.6149
## ARIMA(1,0,3)(2,1,2)[12] with drift : 999.6705
## ARIMA(3,0,3)(2,1,2)[12] with drift : 971.2854
## ARIMA(3,0,3)(1,1,2)[12] with drift : 973.9898
## ARIMA(3,0,3)(2,1,1)[12] with drift : 969.51
## ARIMA(3,0,3)(1,1,1)[12] with drift : 975.4823
## ARIMA(3,0,3)(2,1,0)[12] with drift : 979.8806
## ARIMA(3,0,3)(1,1,0)[12] with drift : 1096.151
## ARIMA(3,0,2)(2,1,1)[12] with drift : 1044.916
## ARIMA(4,0,3)(2,1,1)[12] with drift : 969.8308
## ARIMA(3,0,4)(2,1,1)[12] with drift : 971.0494
## ARIMA(2,0,4)(2,1,1)[12] with drift : 985.9654
## ARIMA(4,0,2)(2,1,1)[12] with drift : 967.7735
## ARIMA(4,0,2)(1,1,1)[12] with drift : 976.2705
## ARIMA(4,0,2)(2,1,0)[12] with drift : 976.1243
## ARIMA(4,0,2)(2,1,2)[12] with drift : 969.0887
## ARIMA(4,0,2)(1,1,0)[12] with drift : 1022.999
## ARIMA(4,0,2)(1,1,2)[12] with drift : 975.0806
## ARIMA(4,0,1)(2,1,1)[12] with drift : 966.197
## ARIMA(4,0,1)(1,1,1)[12] with drift : 974.9985
## ARIMA(4,0,1)(2,1,0)[12] with drift : 974.9076
## ARIMA(4,0,1)(2,1,2)[12] with drift : 967.5903
## ARIMA(4,0,1)(1,1,0)[12] with drift : 1021.926
## ARIMA(4,0,1)(1,1,2)[12] with drift : 973.5646
## ARIMA(3,0,1)(2,1,1)[12] with drift : 974.9792
## ARIMA(4,0,0)(2,1,1)[12] with drift : 974.2169
## ARIMA(5,0,1)(2,1,1)[12] with drift : 968.7653
## ARIMA(3,0,0)(2,1,1)[12] with drift : 1016.15
## ARIMA(5,0,0)(2,1,1)[12] with drift : 971.5388
## ARIMA(5,0,2)(2,1,1)[12] with drift : 970.6539
## ARIMA(4,0,1)(2,1,1)[12] : 970.5866
##
## Now re-fitting the best model(s) without approximations...
##
## ARIMA(4,0,1)(2,1,1)[12] with drift : 1022.693
##
## Best model: ARIMA(4,0,1)(2,1,1)[12] with drift
sarima(df11, 4,0,1,2,1,1,12) # accoring to the disgnose plots, the model is a good fit
## initial value 1.283466
## iter 2 value 0.336696
## iter 3 value 0.234533
## iter 4 value 0.062828
## iter 5 value 0.004002
## iter 6 value -0.085041
## iter 7 value -0.129895
## iter 8 value -0.218406
## iter 9 value -0.285143
## iter 10 value -0.299936
## iter 11 value -0.302313
## iter 12 value -0.305571
## iter 13 value -0.320862
## iter 14 value -0.328682
## iter 15 value -0.340570
## iter 16 value -0.344303
## iter 17 value -0.348020
## iter 18 value -0.352979
## iter 19 value -0.361265
## iter 20 value -0.365733
## iter 21 value -0.365747
## iter 22 value -0.366222
## iter 23 value -0.366602
## iter 24 value -0.366917
## iter 25 value -0.367217
## iter 26 value -0.367664
## iter 27 value -0.368020
## iter 28 value -0.368310
## iter 29 value -0.368590
## iter 30 value -0.368807
## iter 31 value -0.368887
## iter 32 value -0.368905
## iter 33 value -0.368920
## iter 34 value -0.368947
## iter 35 value -0.368975
## iter 36 value -0.368989
## iter 37 value -0.368991
## iter 38 value -0.368991
## iter 39 value -0.368991
## iter 39 value -0.368991
## final value -0.368991
## converged
## initial value -0.342462
## iter 2 value -0.342870
## iter 3 value -0.343386
## iter 4 value -0.343408
## iter 5 value -0.343445
## iter 6 value -0.343489
## iter 7 value -0.343524
## iter 8 value -0.343534
## iter 9 value -0.343543
## iter 10 value -0.343559
## iter 11 value -0.343579
## iter 12 value -0.343592
## iter 13 value -0.343596
## iter 14 value -0.343598
## iter 15 value -0.343601
## iter 16 value -0.343603
## iter 17 value -0.343604
## iter 18 value -0.343604
## iter 19 value -0.343605
## iter 20 value -0.343605
## iter 21 value -0.343605
## iter 22 value -0.343605
## iter 23 value -0.343605
## iter 24 value -0.343606
## iter 25 value -0.343606
## iter 26 value -0.343606
## iter 27 value -0.343606
## iter 28 value -0.343606
## iter 29 value -0.343606
## iter 30 value -0.343606
## iter 30 value -0.343606
## iter 30 value -0.343606
## final value -0.343606
## converged
## $fit
##
## Call:
## stats::arima(x = xdata, order = c(p, d, q), seasonal = list(order = c(P, D,
## Q), period = S), xreg = constant, transform.pars = trans, fixed = fixed,
## optim.control = list(trace = trc, REPORT = 1, reltol = tol))
##
## Coefficients:
## ar1 ar2 ar3 ar4 ma1 sar1 sar2 sma1
## 1.3971 -0.2448 0.0066 -0.1750 -0.4334 -0.1474 -0.1339 -0.4812
## s.e. 0.1644 0.1835 0.0907 0.0701 0.1669 0.1127 0.0767 0.1086
## constant
## 0.1231
## s.e. 0.0390
##
## sigma^2 estimated as 0.4928: log likelihood = -501.1, aic = 1022.21
##
## $degrees_of_freedom
## [1] 457
##
## $ttable
## Estimate SE t.value p.value
## ar1 1.3971 0.1644 8.4990 0.0000
## ar2 -0.2448 0.1835 -1.3338 0.1829
## ar3 0.0066 0.0907 0.0726 0.9421
## ar4 -0.1750 0.0701 -2.4959 0.0129
## ma1 -0.4334 0.1669 -2.5967 0.0097
## sar1 -0.1474 0.1127 -1.3076 0.1917
## sar2 -0.1339 0.0767 -1.7467 0.0814
## sma1 -0.4812 0.1086 -4.4301 0.0000
## constant 0.1231 0.0390 3.1602 0.0017
##
## $AIC
## [1] 2.142997
##
## $AICc
## [1] 2.143805
##
## $BIC
## [1] 2.229878
plot(diff(df11)) # d=1
acf(diff(df11), lag.max=200) # MA:q= 0
pacf(diff(df11), lag.max=200) # AR:p= 4,5
# try ARIMA(4,0,1)
fit1.1 <- Arima(df11, order=c(4, 1, 1), seasonal=c(0,1,0), method="ML",include.drift = TRUE)
## Warning in Arima(df11, order = c(4, 1, 1), seasonal = c(0, 1, 0), method =
## "ML", : No drift term fitted as the order of difference is 2 or more.
summary(fit1.1)
## Series: df11
## ARIMA(4,1,1)(0,1,0)[12]
##
## Coefficients:
## ar1 ar2 ar3 ar4 ma1
## 0.0380 0.1928 0.2395 0.0621 -0.0319
## s.e. 0.5318 0.0466 0.1189 0.1400 0.5322
##
## sigma^2 estimated as 0.7274: log likelihood=-583.42
## AIC=1178.83 AICc=1179.02 BIC=1203.68
##
## Training set error measures:
## ME RMSE MAE MPE MAPE
## Training set -0.00228956 0.8366436 0.6124725 0.004780768 0.7093855
## MASE ACF1
## Training set 0.1979926 0.00137799
checkresiduals(fit1.1)
##
## Ljung-Box test
##
## data: Residuals from ARIMA(4,1,1)(0,1,0)[12]
## Q* = 112.44, df = 19, p-value = 2.776e-15
##
## Model df: 5. Total lags used: 24
# AIC=1178.83 AICc=1179.02 BIC=1203.68
fit1.2 <- Arima(df11, order=c(5, 0, 1), seasonal=c(0,1,0), method="ML",include.drift = TRUE)
summary(fit1.2)
## Series: df11
## ARIMA(5,0,1)(0,1,0)[12] with drift
##
## Coefficients:
## ar1 ar2 ar3 ar4 ar5 ma1 drift
## 0.7600 0.3868 0.1127 -0.1658 -0.1681 0.1897 0.1215
## s.e. 1.5732 1.5818 0.2922 0.0942 0.3065 1.6364 0.0501
##
## sigma^2 estimated as 0.6837: log likelihood=-570.79
## AIC=1157.57 AICc=1157.89 BIC=1190.73
##
## Training set error measures:
## ME RMSE MAE MPE MAPE
## Training set 0.005559697 0.8102737 0.6024348 0.009176348 0.6974393
## MASE ACF1
## Training set 0.1947477 -0.01055751
checkresiduals(fit1.2)
##
## Ljung-Box test
##
## data: Residuals from ARIMA(5,0,1)(0,1,0)[12] with drift
## Q* = 102.35, df = 17, p-value = 3.253e-14
##
## Model df: 7. Total lags used: 24
# AIC=1157.57 AICc=1157.89 BIC=1190.73
# comparing the by hand models with ARIMA(4,0,1)(2,1,1)[12], the ARIMA(4,0,1)(2,1,1)[12] fits better
pred1=forecast(fit1,h=48)
pred1 # The last 2 columns are 95% prediction intervals lower bound and upper bound
## Point Forecast Lo 80 Hi 80 Lo 95 Hi 95
## Nov 2019 114.9564 114.0478 115.8649 113.5669 116.3459
## Dec 2019 114.9664 113.7047 116.2282 113.0367 116.8961
## Jan 2020 114.8472 113.2367 116.4577 112.3841 117.3103
## Feb 2020 114.4272 112.4248 116.4297 111.3647 117.4897
## Mar 2020 114.8748 112.5066 117.2430 111.2530 118.4967
## Apr 2020 113.1381 110.4227 115.8535 108.9853 117.2909
## May 2020 113.5341 110.4884 116.5797 108.8762 118.1920
## Jun 2020 116.6900 113.3371 120.0428 111.5623 121.8177
## Jul 2020 114.8741 111.2374 118.5108 109.3122 120.4360
## Aug 2020 117.6623 113.7649 121.5597 111.7018 123.6229
## Sep 2020 116.0626 111.9277 120.1974 109.7388 122.3863
## Oct 2020 115.3294 110.9796 119.6792 108.6769 121.9818
## Nov 2020 115.0089 110.3571 119.6607 107.8945 122.1232
## Dec 2020 115.2870 110.3707 120.2033 107.7682 122.8058
## Jan 2021 115.3925 110.2268 120.5583 107.4922 123.2928
## Feb 2021 115.2023 109.7942 120.6105 106.9313 123.4734
## Mar 2021 115.8583 110.2268 121.4899 107.2456 124.4711
## Apr 2021 114.4128 108.5759 120.2497 105.4861 123.3395
## May 2021 114.8070 108.7824 120.8316 105.5931 124.0209
## Jun 2021 118.1339 111.9396 124.3281 108.6606 127.6072
## Jul 2021 116.4791 110.1327 122.8255 106.7731 126.1850
## Aug 2021 119.4140 112.9320 125.8959 109.5007 129.3273
## Sep 2021 117.9293 111.3274 124.5311 107.8325 128.0260
## Oct 2021 117.2416 110.5343 123.9489 106.9836 127.4996
## Nov 2021 117.0175 110.1625 123.8725 106.5337 127.5013
## Dec 2021 117.2923 110.3086 124.2761 106.6116 127.9731
## Jan 2022 117.4746 110.3696 124.5796 106.6084 128.3408
## Feb 2022 117.2795 110.0563 124.5027 106.2325 128.3264
## Mar 2022 117.9327 110.6005 125.2649 106.7190 129.1463
## Apr 2022 116.3650 108.9327 123.7974 104.9982 127.7318
## May 2022 116.7950 109.2710 124.3190 105.2881 128.3019
## Jun 2022 120.0865 112.4798 127.6931 108.4531 131.7198
## Jul 2022 118.3912 110.7106 126.0718 106.6448 130.1377
## Aug 2022 121.3911 113.6448 129.1375 109.5441 133.2381
## Sep 2022 119.8427 112.0384 127.6470 107.9071 131.7783
## Oct 2022 119.0153 111.1603 126.8703 107.0022 131.0284
## Nov 2022 118.7702 110.8218 126.7185 106.6142 130.9261
## Dec 2022 119.0077 110.9775 127.0379 106.7265 131.2888
## Jan 2023 119.1454 111.0346 127.2561 106.7410 131.5497
## Feb 2023 118.9160 110.7219 127.1101 106.3843 131.4477
## Mar 2023 119.5366 111.2631 127.8101 106.8834 132.1899
## Apr 2023 117.9422 109.5932 126.2912 105.1735 130.7109
## May 2023 118.3609 109.9406 126.7812 105.4832 131.2386
## Jun 2023 121.6281 113.1417 130.1144 108.6493 134.6068
## Jul 2023 119.9103 111.3634 128.4573 106.8389 132.9818
## Aug 2023 122.8740 114.2719 131.4761 109.7182 136.0297
## Sep 2023 121.3124 112.6607 129.9642 108.0808 134.5441
## Oct 2023 120.4925 111.7965 129.1884 107.1931 133.7918
autoplot(pred1) #plot with confidence interval
# fit ARIMA model without seasonal term
fit2=auto.arima(df11, seasonal = FALSE, trace = TRUE) # ARIMA(2,1,2)
##
## Fitting models using approximations to speed things up...
##
## ARIMA(2,1,2) with drift : 1670.596
## ARIMA(0,1,0) with drift : 1849.828
## ARIMA(1,1,0) with drift : 1745.937
## ARIMA(0,1,1) with drift : 1749.283
## ARIMA(0,1,0) : 1850.404
## ARIMA(1,1,2) with drift : 1734.904
## ARIMA(2,1,1) with drift : 1731.286
## ARIMA(3,1,2) with drift : 1727.289
## ARIMA(2,1,3) with drift : 1634.972
## ARIMA(1,1,3) with drift : 1733.839
## ARIMA(3,1,3) with drift : Inf
## ARIMA(2,1,4) with drift : Inf
## ARIMA(1,1,4) with drift : 1731.388
## ARIMA(3,1,4) with drift : 1637.621
## ARIMA(2,1,3) : 1641.337
##
## Now re-fitting the best model(s) without approximations...
##
## ARIMA(2,1,3) with drift : Inf
## ARIMA(3,1,4) with drift : Inf
## ARIMA(2,1,3) : Inf
## ARIMA(2,1,2) with drift : 1670.536
##
## Best model: ARIMA(2,1,2) with drift
df11.diff=diff(df11)
plot(df11.diff, main=" diff data")
df11.log=log(df11)
plot(df11.log, main=" log data")
di11.difflog=diff(log(df11))
plot(di11.difflog, main=" difflog data")
acf(di11.difflog, lag.max = 100) # from pacf plot, we could observe the seasonality
pacf(di11.difflog, lag.max = 100)# from pacf plot, AR(1) or AR(2)
acf2(df11^2) # accoring to pacf plot, ARCH(1) or ARCH(2) model
## ACF PACF
## [1,] 0.99 0.99
## [2,] 0.99 0.21
## [3,] 0.98 -0.03
## [4,] 0.97 -0.08
## [5,] 0.97 0.02
## [6,] 0.96 0.05
## [7,] 0.95 -0.07
## [8,] 0.95 -0.06
## [9,] 0.94 0.08
## [10,] 0.93 -0.02
## [11,] 0.93 -0.06
## [12,] 0.92 0.20
## [13,] 0.91 -0.35
## [14,] 0.90 0.10
## [15,] 0.90 0.01
## [16,] 0.89 -0.01
## [17,] 0.88 0.07
## [18,] 0.88 0.03
## [19,] 0.87 -0.03
## [20,] 0.86 0.04
## [21,] 0.86 0.02
## [22,] 0.85 0.02
## [23,] 0.84 -0.04
## [24,] 0.84 0.09
## [25,] 0.83 -0.22
## [26,] 0.83 0.09
## [27,] 0.82 0.01
## [28,] 0.81 -0.04
## [29,] 0.81 0.03
## [30,] 0.80 -0.01
## [31,] 0.79 0.02
## [32,] 0.79 0.01
## [33,] 0.78 0.01
## [34,] 0.78 0.01
## [35,] 0.77 -0.01
## [36,] 0.77 0.06
## [37,] 0.76 -0.17
## [38,] 0.76 0.02
## [39,] 0.75 0.02
## [40,] 0.74 0.00
## [41,] 0.74 0.03
## [42,] 0.73 0.02
## [43,] 0.73 -0.01
## [44,] 0.72 0.04
## [45,] 0.72 -0.02
## [46,] 0.71 0.00
## [47,] 0.71 0.01
## [48,] 0.70 0.01
# fit ARIMA(2,1,2) model, then plot the residuals ACF and PACF
arima212=arima(df11, order=c(2,1,2))
residual_arima212=arima212$residuals
acf2(residual_arima212^2)
## ACF PACF
## [1,] 0.03 0.03
## [2,] 0.12 0.12
## [3,] 0.12 0.11
## [4,] -0.07 -0.09
## [5,] 0.05 0.02
## [6,] -0.10 -0.10
## [7,] 0.08 0.10
## [8,] -0.09 -0.09
## [9,] 0.00 0.02
## [10,] 0.10 0.09
## [11,] 0.08 0.12
## [12,] 0.37 0.34
## [13,] 0.01 -0.02
## [14,] 0.06 -0.06
## [15,] 0.04 -0.04
## [16,] -0.08 -0.04
## [17,] 0.03 0.01
## [18,] -0.08 -0.02
## [19,] 0.02 0.00
## [20,] -0.07 -0.02
## [21,] 0.05 0.08
## [22,] 0.11 0.05
## [23,] 0.04 -0.01
## [24,] 0.29 0.14
## [25,] 0.04 0.04
## [26,] 0.07 0.03
## [27,] 0.10 0.08
## [28,] -0.10 -0.08
## [29,] 0.06 0.03
## [30,] -0.06 0.00
## [31,] 0.02 0.01
## [32,] -0.11 -0.11
## [33,] 0.06 0.05
## [34,] 0.03 -0.05
## [35,] 0.00 0.01
## [36,] 0.32 0.18
## [37,] 0.04 0.04
## [38,] 0.11 0.04
## [39,] 0.09 0.02
## [40,] -0.07 -0.04
## [41,] 0.02 -0.03
## [42,] -0.10 -0.04
## [43,] 0.02 0.01
## [44,] -0.10 -0.01
## [45,] -0.01 -0.01
## [46,] 0.09 0.06
## [47,] 0.01 0.00
## [48,] 0.28 0.07
par(mfrow=c(2,2))
acf(residual_arima212, main="ACF of residuals (US)", lag.max=100)
pacf(residual_arima212, main="PACF of residuals (US)", lag.max=100)
acf(residual_arima212^2, main="ACF of squared residuals (US)", lag.max=100)
pacf(residual_arima212^2, main="PACF of squared residuals (US)", lag.max=100)
garch_us1=garch(residual_arima212,order=c(3,3),trace=F)
summary(garch_us1)
##
## Call:
## garch(x = residual_arima212, order = c(3, 3), trace = F)
##
## Model:
## GARCH(3,3)
##
## Residuals:
## Min 1Q Median 3Q Max
## -4.72215 -0.57499 0.05402 0.77642 2.72311
##
## Coefficient(s):
## Estimate Std. Error t value Pr(>|t|)
## a0 1.455e+00 2.754e+00 0.529 0.597
## a1 1.054e-02 6.284e-02 0.168 0.867
## a2 1.015e-01 8.114e-02 1.251 0.211
## a3 6.450e-02 2.579e-01 0.250 0.803
## b1 1.073e-02 2.530e+00 0.004 0.997
## b2 1.471e-15 1.614e+00 0.000 1.000
## b3 3.760e-02 8.583e-01 0.044 0.965
##
## Diagnostic Tests:
## Jarque Bera Test
##
## data: Residuals
## X-squared = 7.9274, df = 2, p-value = 0.01899
##
##
## Box-Ljung test
##
## data: Squared.Residuals
## X-squared = 0.055499, df = 1, p-value = 0.8138
AIC(garch_us1)
## [1] 1659.3
garch_us2=garch(residual_arima212,order=c(0,3),trace=F) # (q,p)
summary(garch_us2)
##
## Call:
## garch(x = residual_arima212, order = c(0, 3), trace = F)
##
## Model:
## GARCH(0,3)
##
## Residuals:
## Min 1Q Median 3Q Max
## -4.66389 -0.57327 0.05375 0.77225 2.69503
##
## Coefficient(s):
## Estimate Std. Error t value Pr(>|t|)
## a0 1.564374 0.183095 8.544 <2e-16 ***
## a1 0.009081 0.060293 0.151 0.880
## a2 0.095794 0.076000 1.260 0.208
## a3 0.070207 0.051845 1.354 0.176
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Diagnostic Tests:
## Jarque Bera Test
##
## data: Residuals
## X-squared = 7.0134, df = 2, p-value = 0.03
##
##
## Box-Ljung test
##
## data: Squared.Residuals
## X-squared = 0.073098, df = 1, p-value = 0.7869
AIC(garch_us2) # the AIC is smaller, so ARCH(3) model is better
## [1] 1652.923
United Kingdom
df1_uk=subset(ifs, (Indicator_Code=='AIP_IX') & (Country_Name=='United Kingdom'))
head(df1_uk)
## Country_Name Country_Code
## 103049 United Kingdom 112
## 103050 United Kingdom 112
## 103051 United Kingdom 112
## 103052 United Kingdom 112
## 103053 United Kingdom 112
## 103054 United Kingdom 112
## Indicator_Name Indicator_Code
## 103049 Economic Activity, Industrial Production, Index AIP_IX
## 103050 Economic Activity, Industrial Production, Index AIP_IX
## 103051 Economic Activity, Industrial Production, Index AIP_IX
## 103052 Economic Activity, Industrial Production, Index AIP_IX
## 103053 Economic Activity, Industrial Production, Index AIP_IX
## 103054 Economic Activity, Industrial Production, Index AIP_IX
## Time_Period Value
## 103049 1997M8 96.15385
## 103050 1998M1 106.01578
## 103051 1998M7 108.48126
## 103052 1998M10 113.70809
## 103053 1998M12 108.48126
## 103054 1999M3 120.71006
dim(df1_uk)
## [1] 474 6
df1_uk$Time_Period <- gsub("M", "/", df1_uk$Time_Period)
df1_uk['date'] = '/01'
df1_uk$Time_Period <- paste(df1_uk$Time_Period, df1_uk$date, sep="")
df1_uk$Time_Period = as.Date(df1_uk$Time_Period, "%Y/%m/%d")
df1_uk = df1_uk[order(df1_uk$Time_Period),]
df1_uk = subset(df1_uk, select = -c(date))
rownames(df1_uk)=NULL
# Research Indicator: Economic Activity, Industrial Production, Index
# Indicator Code: AIP_IX
# Country: USA
# the original data
df22=ts(df1_uk$Value, frequency = 12, start = c(1980, 1))
plot(df22, main="United Kingdom: Industrial Production Index")
plot(decompose(df22))
sp=spectrum(df22, log='no')
# find the cycle with predominant frequecny
sp$fre[which.max(sp$spec)] # 0.025
## [1] 0.025
# find the cycle of predominant frequecny
1/sp$fre[which.max(sp$spec)] # 40 month
## [1] 40
max(sp$spec) #estimate of spectral density at predominant period which is close to 841.0988.
## [1] 841.0988
U = qchisq(.025,2)
L = qchisq(.975,2)
2*max(sp$spec)/L #lower bound for spectral density
## [1] 228.0093
2*max(sp$spec)/U #upper bound for spectral density
## [1] 33221.63
# The 95% confidence interval is [228.0093, 33221.63]
fit22 <- auto.arima(df22, trace = TRUE) # Best model: ARIMA(5,1,1)(2,1,2)[12]
##
## Fitting models using approximations to speed things up...
##
## ARIMA(2,1,2)(1,1,1)[12] : 2059.437
## ARIMA(0,1,0)(0,1,0)[12] : 2571.477
## ARIMA(1,1,0)(1,1,0)[12] : 2401.882
## ARIMA(0,1,1)(0,1,1)[12] : 2245.311
## ARIMA(2,1,2)(0,1,1)[12] : 2082.606
## ARIMA(2,1,2)(1,1,0)[12] : 2157.584
## ARIMA(2,1,2)(2,1,1)[12] : 2054.25
## ARIMA(2,1,2)(2,1,0)[12] : 2125.736
## ARIMA(2,1,2)(2,1,2)[12] : 2046.093
## ARIMA(2,1,2)(1,1,2)[12] : 2052.854
## ARIMA(1,1,2)(2,1,2)[12] : 2072.008
## ARIMA(2,1,1)(2,1,2)[12] : 2044.492
## ARIMA(2,1,1)(1,1,2)[12] : 2050.995
## ARIMA(2,1,1)(2,1,1)[12] : 2052.372
## ARIMA(2,1,1)(1,1,1)[12] : 2057.492
## ARIMA(1,1,1)(2,1,2)[12] : 2082.583
## ARIMA(2,1,0)(2,1,2)[12] : 2044.521
## ARIMA(3,1,1)(2,1,2)[12] : 2042.901
## ARIMA(3,1,1)(1,1,2)[12] : 2054.019
## ARIMA(3,1,1)(2,1,1)[12] : 2056.749
## ARIMA(3,1,1)(1,1,1)[12] : 2059.586
## ARIMA(3,1,0)(2,1,2)[12] : 2043.263
## ARIMA(4,1,1)(2,1,2)[12] : 2047.265
## ARIMA(3,1,2)(2,1,2)[12] : 2039.889
## ARIMA(3,1,2)(1,1,2)[12] : 2055.05
## ARIMA(3,1,2)(2,1,1)[12] : 2054.321
## ARIMA(3,1,2)(1,1,1)[12] : 2060.681
## ARIMA(4,1,2)(2,1,2)[12] : 2034.072
## ARIMA(4,1,2)(1,1,2)[12] : Inf
## ARIMA(4,1,2)(2,1,1)[12] : 2055.666
## ARIMA(4,1,2)(1,1,1)[12] : 2061.153
## ARIMA(5,1,2)(2,1,2)[12] : 2034.261
## ARIMA(4,1,3)(2,1,2)[12] : 2036.166
## ARIMA(3,1,3)(2,1,2)[12] : Inf
## ARIMA(5,1,1)(2,1,2)[12] : 2032.826
## ARIMA(5,1,1)(1,1,2)[12] : 2050.585
## ARIMA(5,1,1)(2,1,1)[12] : 2056.553
## ARIMA(5,1,1)(1,1,1)[12] : 2055.263
## ARIMA(5,1,0)(2,1,2)[12] : 2046.034
## ARIMA(4,1,0)(2,1,2)[12] : 2045.187
##
## Now re-fitting the best model(s) without approximations...
##
## ARIMA(5,1,1)(2,1,2)[12] : 2065.401
##
## Best model: ARIMA(5,1,1)(2,1,2)[12]
sarima(df22, 5,1,1,2,1,2,12)
## initial value 1.434990
## iter 2 value 1.230325
## iter 3 value 0.933385
## iter 4 value 0.874606
## iter 5 value 0.851026
## iter 6 value 0.848742
## iter 7 value 0.845081
## iter 8 value 0.840059
## iter 9 value 0.837000
## iter 10 value 0.836880
## iter 11 value 0.836817
## iter 12 value 0.836764
## iter 13 value 0.836583
## iter 14 value 0.836095
## iter 15 value 0.833691
## iter 16 value 0.833226
## iter 17 value 0.832536
## iter 18 value 0.827505
## iter 19 value 0.822364
## iter 20 value 0.820549
## iter 21 value 0.818534
## iter 22 value 0.818008
## iter 23 value 0.817984
## iter 24 value 0.817974
## iter 25 value 0.817901
## iter 26 value 0.817868
## iter 27 value 0.817826
## iter 28 value 0.817781
## iter 29 value 0.817756
## iter 30 value 0.817708
## iter 31 value 0.817648
## iter 32 value 0.817429
## iter 33 value 0.815713
## iter 34 value 0.814529
## iter 35 value 0.812862
## iter 36 value 0.812569
## iter 37 value 0.811984
## iter 38 value 0.811157
## iter 39 value 0.809988
## iter 40 value 0.808249
## iter 41 value 0.807374
## iter 42 value 0.806551
## iter 43 value 0.804976
## iter 44 value 0.803021
## iter 45 value 0.801629
## iter 46 value 0.801293
## iter 47 value 0.801083
## iter 48 value 0.801062
## iter 49 value 0.801057
## iter 50 value 0.801057
## iter 50 value 0.801057
## iter 50 value 0.801057
## final value 0.801057
## converged
## initial value 0.800808
## iter 2 value 0.797917
## iter 3 value 0.797560
## iter 4 value 0.797310
## iter 5 value 0.796981
## iter 6 value 0.796822
## iter 7 value 0.796772
## iter 8 value 0.796749
## iter 9 value 0.796746
## iter 10 value 0.796746
## iter 11 value 0.796745
## iter 12 value 0.796745
## iter 13 value 0.796744
## iter 14 value 0.796742
## iter 15 value 0.796739
## iter 16 value 0.796733
## iter 17 value 0.796724
## iter 18 value 0.796703
## iter 19 value 0.796700
## iter 20 value 0.796697
## iter 21 value 0.796695
## iter 22 value 0.796694
## iter 23 value 0.796694
## iter 24 value 0.796694
## iter 24 value 0.796694
## iter 24 value 0.796694
## final value 0.796694
## converged
## $fit
##
## Call:
## stats::arima(x = xdata, order = c(p, d, q), seasonal = list(order = c(P, D,
## Q), period = S), include.mean = !no.constant, transform.pars = trans, fixed = fixed,
## optim.control = list(trace = trc, REPORT = 1, reltol = tol))
##
## Coefficients:
## ar1 ar2 ar3 ar4 ar5 ma1 sar1 sar2
## -0.0638 0.0922 0.3647 -0.0568 0.0504 -0.7192 0.7350 -0.5457
## s.e. 0.4768 0.3755 0.2256 0.0499 0.0501 0.4753 0.0664 0.0644
## sma1 sma2
## -1.2922 0.5803
## s.e. 0.0700 0.0604
##
## sigma^2 estimated as 4.752: log likelihood = -1021.41, aic = 2064.81
##
## $degrees_of_freedom
## [1] 451
##
## $ttable
## Estimate SE t.value p.value
## ar1 -0.0638 0.4768 -0.1338 0.8936
## ar2 0.0922 0.3755 0.2457 0.8060
## ar3 0.3647 0.2256 1.6165 0.1067
## ar4 -0.0568 0.0499 -1.1375 0.2560
## ar5 0.0504 0.0501 1.0067 0.3146
## ma1 -0.7192 0.4753 -1.5132 0.1309
## sar1 0.7350 0.0664 11.0656 0.0000
## sar2 -0.5457 0.0644 -8.4754 0.0000
## sma1 -1.2922 0.0700 -18.4594 0.0000
## sma2 0.5803 0.0604 9.6128 0.0000
##
## $AIC
## [1] 4.374604
##
## $AICc
## [1] 4.375615
##
## $BIC
## [1] 4.470933
plot(diff(df22, 3)) # d=1
acf(diff(df22, 3), lag.max=200) # MA:q= 0
pacf(diff(df22, 3), lag.max=200) # AR:p= 1, 2
# try ARIMA(1,1,0)
fit2.1 <- Arima(df22, order=c(1, 1, 0), seasonal=c(0,3,0), method="ML",include.drift = TRUE)
## Warning in Arima(df22, order = c(1, 1, 0), seasonal = c(0, 3, 0), method =
## "ML", : No drift term fitted as the order of difference is 2 or more.
summary(fit2.1)
## Series: df22
## ARIMA(1,1,0)(0,3,0)[12]
##
## Coefficients:
## ar1
## -0.5182
## s.e. 0.0409
##
## sigma^2 estimated as 69.66: log likelihood=-1553.69
## AIC=3111.38 AICc=3111.4 BIC=3119.54
##
## Training set error measures:
## ME RMSE MAE MPE MAPE MASE
## Training set 0.03230883 8.004988 5.838966 0.05872132 5.806634 2.023926
## ACF1
## Training set -0.2282839
checkresiduals(fit2.1)
##
## Ljung-Box test
##
## data: Residuals from ARIMA(1,1,0)(0,3,0)[12]
## Q* = 394.41, df = 23, p-value < 2.2e-16
##
## Model df: 1. Total lags used: 24
# AIC=3111.38 AICc=3111.4 BIC=3119.54
fit2.2 <- Arima(df22, order=c(2, 1, 0),seasonal=c(0,3,0), method="ML",include.drift = TRUE)
## Warning in Arima(df22, order = c(2, 1, 0), seasonal = c(0, 3, 0), method =
## "ML", : No drift term fitted as the order of difference is 2 or more.
summary(fit2.2)
## Series: df22
## ARIMA(2,1,0)(0,3,0)[12]
##
## Coefficients:
## ar1 ar2
## -0.7483 -0.4426
## s.e. 0.0430 0.0430
##
## sigma^2 estimated as 56.21: log likelihood=-1506.52
## AIC=3019.05 AICc=3019.1 BIC=3031.29
##
## Training set error measures:
## ME RMSE MAE MPE MAPE MASE
## Training set 0.02702874 7.18248 5.234064 0.05320192 5.223328 1.814252
## ACF1
## Training set 0.02938732
# AIC=3019.05 AICc=3019.1 BIC=3031.29
checkresiduals(fit2.2)
##
## Ljung-Box test
##
## data: Residuals from ARIMA(2,1,0)(0,3,0)[12]
## Q* = 269.44, df = 22, p-value < 2.2e-16
##
## Model df: 2. Total lags used: 24
# comparing the diagnostic plot, the ARIMA(5,1,1)(2,1,2)[12] is better in residual acf
pred22=forecast(fit22,h=48)
pred22 # The last 2 columns are 95% prediction intervals lower bound and upper bound
## Point Forecast Lo 80 Hi 80 Lo 95 Hi 95
## Jul 2019 97.15872 94.33405 99.98339 92.83876 101.47868
## Aug 2019 92.65677 89.76634 95.54720 88.23624 97.07730
## Sep 2019 97.44523 94.38186 100.50860 92.76021 102.13025
## Oct 2019 102.04632 98.48557 105.60707 96.60062 107.49202
## Nov 2019 101.66363 98.00648 105.32078 96.07050 107.25676
## Dec 2019 96.12563 92.21497 100.03629 90.14480 102.10646
## Jan 2020 96.89099 92.73152 101.05046 90.52963 103.25235
## Feb 2020 96.56224 92.26413 100.86035 89.98885 103.13564
## Mar 2020 106.70315 102.18151 111.22480 99.78789 113.61841
## Apr 2020 96.32467 91.62434 101.02500 89.13614 103.51321
## May 2020 97.71657 92.85921 102.57394 90.28788 105.14527
## Jun 2020 98.12829 93.08535 103.17123 90.41577 105.84080
## Jul 2020 96.24240 90.60779 101.87702 87.62501 104.85980
## Aug 2020 91.05382 85.20889 96.89875 82.11478 99.99287
## Sep 2020 99.35485 93.24877 105.46094 90.01640 108.69330
## Oct 2020 100.70513 94.25563 107.15463 90.84147 110.56878
## Nov 2020 101.64047 94.97898 108.30196 91.45260 111.82834
## Dec 2020 97.51976 90.58919 104.45032 86.92038 108.11913
## Jan 2021 96.47777 89.28843 103.66710 85.48263 107.47290
## Feb 2021 97.05218 89.64624 104.45811 85.72578 108.37858
## Mar 2021 107.13560 99.48614 114.78505 95.43676 118.83443
## Apr 2021 96.68426 88.81219 104.55632 84.64496 108.72355
## May 2021 97.77658 89.69261 105.86054 85.41322 110.13994
## Jun 2021 100.31640 92.01391 108.61888 87.61884 113.01395
## Jul 2021 95.93933 87.38900 104.48967 82.86272 109.01594
## Aug 2021 92.36534 83.60558 101.12509 78.96845 105.76223
## Sep 2021 101.05943 92.08641 110.03246 87.33638 114.78249
## Oct 2021 101.43646 92.24749 110.62543 87.38315 115.48977
## Nov 2021 103.85441 94.46598 113.24285 89.49605 118.21278
## Dec 2021 98.45137 88.86007 108.04267 83.78274 113.12000
## Jan 2022 98.19067 88.40147 107.97987 83.21938 113.16196
## Feb 2022 98.30338 88.32342 108.28333 83.04036 113.56640
## Mar 2022 108.46987 98.29905 118.64069 92.91495 124.02479
## Apr 2022 97.18001 86.82355 107.53647 81.34118 113.01884
## May 2022 99.06753 88.52899 109.60606 82.95023 115.18482
## Jun 2022 101.41191 90.69311 112.13071 85.01892 117.80490
## Jul 2022 96.39425 85.44720 107.34130 79.65218 113.13631
## Aug 2022 94.33364 83.20311 105.46418 77.31096 111.35633
## Sep 2022 101.42528 90.10700 112.74355 84.11547 118.73508
## Oct 2022 102.86239 91.34590 114.37888 85.24944 120.47534
## Nov 2022 105.63436 93.93951 117.32921 87.74863 123.52009
## Dec 2022 98.53044 86.65072 110.41015 80.36199 116.69888
## Jan 2023 99.82446 87.76232 111.88660 81.37700 118.27191
## Feb 2023 99.10189 86.86534 111.33844 80.38771 117.81607
## Mar 2023 109.36698 96.95338 121.78058 90.38202 128.35195
## Apr 2023 97.49608 84.90984 110.08231 78.24709 116.74506
## May 2023 100.13238 87.37672 112.88805 80.62427 119.64049
## Jun 2023 101.17348 88.24863 114.09834 81.40662 120.94034
autoplot(pred22) #plot with confidence interval
# fit ARIMA model without seasonal term
fit3=auto.arima(df22, seasonal = FALSE, trace = TRUE) # ARIMA(2,1,2)
##
## Fitting models using approximations to speed things up...
##
## ARIMA(2,1,2) with drift : 2911.535
## ARIMA(0,1,0) with drift : 3129.63
## ARIMA(1,1,0) with drift : 3081.236
## ARIMA(0,1,1) with drift : 2939.252
## ARIMA(0,1,0) : 3127.617
## ARIMA(1,1,2) with drift : 2934.21
## ARIMA(2,1,1) with drift : 2926.379
## ARIMA(3,1,2) with drift : Inf
## ARIMA(2,1,3) with drift : 2832.569
## ARIMA(1,1,3) with drift : 2934.936
## ARIMA(3,1,3) with drift : Inf
## ARIMA(2,1,4) with drift : Inf
## ARIMA(1,1,4) with drift : Inf
## ARIMA(3,1,4) with drift : Inf
## ARIMA(2,1,3) : 2831.025
## ARIMA(1,1,3) : 2933.27
## ARIMA(2,1,2) : 2909.949
## ARIMA(3,1,3) : Inf
## ARIMA(2,1,4) : Inf
## ARIMA(1,1,2) : 2932.669
## ARIMA(1,1,4) : Inf
## ARIMA(3,1,2) : Inf
## ARIMA(3,1,4) : Inf
##
## Now re-fitting the best model(s) without approximations...
##
## ARIMA(2,1,3) : Inf
## ARIMA(2,1,3) with drift : Inf
## ARIMA(2,1,2) : 2907.698
##
## Best model: ARIMA(2,1,2)
df22.diff=diff(df22)
plot(df22.diff, main=" diff data")
df22.log=log(df22)
plot(df22.log, main=" log data")
di22.difflog=diff(log(df22))
plot(di22.difflog, main=" difflog data")
acf(di22.difflog, lag.max = 100) # from pacf plot, we could observe the seasonality
pacf(di22.difflog, lag.max = 100)# from pacf plot, AR(3) or AR(4)
acf2(df22^2) # accoring to pacf plot, ARCH(4)
## ACF PACF
## [1,] 0.78 0.78
## [2,] 0.71 0.26
## [3,] 0.72 0.30
## [4,] 0.76 0.32
## [5,] 0.73 0.12
## [6,] 0.75 0.24
## [7,] 0.71 -0.03
## [8,] 0.75 0.25
## [9,] 0.68 -0.17
## [10,] 0.65 -0.08
## [11,] 0.75 0.35
## [12,] 0.90 0.64
## [13,] 0.71 -0.45
## [14,] 0.65 -0.32
## [15,] 0.62 -0.41
## [16,] 0.68 0.05
## [17,] 0.66 0.02
## [18,] 0.65 0.08
## [19,] 0.62 0.09
## [20,] 0.68 0.09
## [21,] 0.58 -0.04
## [22,] 0.57 0.09
## [23,] 0.67 0.07
## [24,] 0.78 0.06
## [25,] 0.63 -0.01
## [26,] 0.57 -0.18
## [27,] 0.52 -0.13
## [28,] 0.60 0.00
## [29,] 0.57 -0.05
## [30,] 0.56 0.01
## [31,] 0.55 0.11
## [32,] 0.58 0.03
## [33,] 0.48 0.01
## [34,] 0.50 0.06
## [35,] 0.57 -0.16
## [36,] 0.68 0.09
## [37,] 0.56 0.08
## [38,] 0.47 -0.12
## [39,] 0.43 -0.10
## [40,] 0.53 0.05
## [41,] 0.47 0.00
## [42,] 0.47 -0.03
## [43,] 0.47 0.08
## [44,] 0.48 -0.02
## [45,] 0.40 -0.05
## [46,] 0.43 0.13
## [47,] 0.46 -0.14
## [48,] 0.60 0.08
# fit ARIMA(2,1,2) model, then plot the residuals ACF and PACF
arima212.2=arima(df22, order=c(2,1,2))
residual_arima212.2=arima212.2$residuals
acf2(residual_arima212.2^2)
## ACF PACF
## [1,] -0.04 -0.04
## [2,] -0.20 -0.20
## [3,] -0.14 -0.16
## [4,] 0.07 0.01
## [5,] 0.09 0.04
## [6,] -0.09 -0.10
## [7,] 0.15 0.19
## [8,] 0.09 0.10
## [9,] -0.13 -0.09
## [10,] -0.14 -0.08
## [11,] -0.11 -0.16
## [12,] 0.52 0.45
## [13,] 0.01 0.02
## [14,] -0.17 -0.04
## [15,] -0.10 0.00
## [16,] 0.04 -0.01
## [17,] 0.07 0.02
## [18,] -0.12 -0.04
## [19,] 0.20 0.13
## [20,] 0.06 -0.05
## [21,] -0.10 0.02
## [22,] -0.18 -0.10
## [23,] -0.09 -0.02
## [24,] 0.39 0.13
## [25,] 0.03 -0.02
## [26,] -0.19 -0.11
## [27,] -0.05 0.03
## [28,] -0.01 -0.06
## [29,] 0.12 0.09
## [30,] -0.11 0.04
## [31,] 0.20 0.04
## [32,] 0.05 -0.01
## [33,] -0.06 0.05
## [34,] -0.19 -0.07
## [35,] -0.04 0.06
## [36,] 0.29 0.03
## [37,] 0.00 -0.09
## [38,] -0.18 -0.05
## [39,] -0.07 -0.07
## [40,] 0.02 0.03
## [41,] 0.14 0.10
## [42,] -0.12 -0.02
## [43,] 0.15 -0.01
## [44,] 0.04 0.00
## [45,] -0.10 -0.07
## [46,] -0.19 -0.06
## [47,] -0.05 0.01
## [48,] 0.42 0.22
par(mfrow=c(2,2))
acf(residual_arima212.2, main="ACF of residuals (UK)", lag.max=200)
pacf(residual_arima212.2, main="PACF of residuals (UK)", lag.max=200)
acf(residual_arima212.2^2, main="ACF of squared residuals (UK)", lag.max=200)
pacf(residual_arima212.2^2, main="PACF of squared residuals (UK)", lag.max=200)
garch_uk1=garch(residual_arima212,order=c(0,7),trace=F)
summary(garch_uk1)
##
## Call:
## garch(x = residual_arima212, order = c(0, 7), trace = F)
##
## Model:
## GARCH(0,7)
##
## Residuals:
## Min 1Q Median 3Q Max
## -4.75116 -0.57509 0.05721 0.79602 2.90765
##
## Coefficient(s):
## Estimate Std. Error t value Pr(>|t|)
## a0 1.225e+00 2.166e-01 5.655 1.56e-08 ***
## a1 5.160e-02 6.214e-02 0.830 0.406
## a2 6.612e-02 6.110e-02 1.082 0.279
## a3 5.267e-02 5.364e-02 0.982 0.326
## a4 1.972e-02 6.305e-02 0.313 0.754
## a5 5.674e-02 6.206e-02 0.914 0.361
## a6 4.078e-15 5.792e-02 0.000 1.000
## a7 7.533e-02 5.892e-02 1.278 0.201
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Diagnostic Tests:
## Jarque Bera Test
##
## data: Residuals
## X-squared = 7.5789, df = 2, p-value = 0.02261
##
##
## Box-Ljung test
##
## data: Squared.Residuals
## X-squared = 0.0044309, df = 1, p-value = 0.9469
AIC(garch_uk1) # the AIC is smaller, so ARCH(0,7) model is better
## [1] 1641.352
garch_uk2=garch(residual_arima212,order=c(4,7),trace=F) # (q,p)
summary(garch_uk2)
##
## Call:
## garch(x = residual_arima212, order = c(4, 7), trace = F)
##
## Model:
## GARCH(4,7)
##
## Residuals:
## Min 1Q Median 3Q Max
## -4.71673 -0.57249 0.05596 0.78096 2.81972
##
## Coefficient(s):
## Estimate Std. Error t value Pr(>|t|)
## a0 8.502e-01 9.173e-01 0.927 0.354
## a1 4.923e-02 6.722e-02 0.732 0.464
## a2 6.343e-02 7.307e-02 0.868 0.385
## a3 4.842e-02 8.977e-02 0.539 0.590
## a4 1.470e-02 8.829e-02 0.166 0.868
## a5 5.396e-02 7.459e-02 0.723 0.469
## a6 3.423e-15 8.823e-02 0.000 1.000
## a7 8.089e-02 8.127e-02 0.995 0.320
## b1 5.343e-02 8.972e-01 0.060 0.953
## b2 5.570e-02 8.293e-01 0.067 0.946
## b3 5.947e-02 5.870e-01 0.101 0.919
## b4 6.267e-02 5.587e-01 0.112 0.911
##
## Diagnostic Tests:
## Jarque Bera Test
##
## data: Residuals
## X-squared = 7.2065, df = 2, p-value = 0.02723
##
##
## Box-Ljung test
##
## data: Squared.Residuals
## X-squared = 0.019276, df = 1, p-value = 0.8896
AIC(garch_uk2)
## [1] 1649.505
Canada
df1_ca=subset(ifs, (Indicator_Code=='AIP_IX') & (Country_Name=='Canada'))
head(df1_ca)
## Country_Name Country_Code
## 178902 Canada 156
## 178903 Canada 156
## 178904 Canada 156
## 178905 Canada 156
## 178906 Canada 156
## 178907 Canada 156
## Indicator_Name Indicator_Code
## 178902 Economic Activity, Industrial Production, Index AIP_IX
## 178903 Economic Activity, Industrial Production, Index AIP_IX
## 178904 Economic Activity, Industrial Production, Index AIP_IX
## 178905 Economic Activity, Industrial Production, Index AIP_IX
## 178906 Economic Activity, Industrial Production, Index AIP_IX
## 178907 Economic Activity, Industrial Production, Index AIP_IX
## Time_Period Value
## 178902 1997M1 NA
## 178903 2007M5 108.47337
## 178904 2007M8 108.61256
## 178905 2008M4 105.83595
## 178906 2009M1 99.02647
## 178907 2009M4 92.17060
df1_ca$Time_Period <- gsub("M", "/", df1_ca$Time_Period)
df1_ca['date'] = '/01'
df1_ca$Time_Period <- paste(df1_ca$Time_Period, df1_ca$date, sep="")
df1_ca$Time_Period = as.Date(df1_ca$Time_Period, "%Y/%m/%d")
df1_ca = df1_ca[order(df1_ca$Time_Period),]
df1_ca = subset(df1_ca, select = -c(date))
rownames(df1_ca)=NULL
df1_ca=na.omit(df1_ca)
head(df1_ca)
## Country_Name Country_Code
## 2 Canada 156
## 3 Canada 156
## 4 Canada 156
## 5 Canada 156
## 6 Canada 156
## 7 Canada 156
## Indicator_Name Indicator_Code
## 2 Economic Activity, Industrial Production, Index AIP_IX
## 3 Economic Activity, Industrial Production, Index AIP_IX
## 4 Economic Activity, Industrial Production, Index AIP_IX
## 5 Economic Activity, Industrial Production, Index AIP_IX
## 6 Economic Activity, Industrial Production, Index AIP_IX
## 7 Economic Activity, Industrial Production, Index AIP_IX
## Time_Period Value
## 2 2000-01-01 106.4326
## 3 2000-02-01 112.6132
## 4 2000-03-01 114.5484
## 5 2000-04-01 110.3226
## 6 2000-05-01 112.6478
## 7 2000-06-01 115.5850
# Research Indicator: Economic Activity, Industrial Production, Index
# Indicator Code: AIP_IX
# Country: Canada
# the original data
df33=ts(df1_ca$Value, frequency = 12, start = c(2000, 1), end=c(2018, 12))
plot(df33, main="Canada: Industrial Production Index")
plot(decompose(df33))
sp=spectrum(df33, log='no')
# find the cycle with predominant frequecny
sp$fre[which.max(sp$spec)] # 0.05
## [1] 0.05
# find the cycle of predominant frequecny
1/sp$fre[which.max(sp$spec)] # 20 month
## [1] 20
max(sp$spec) #estimate of spectral density at predominant period which is close to 115.1996.
## [1] 115.1996
U = qchisq(.025,2)
L = qchisq(.975,2)
2*max(sp$spec)/L #lower bound for spectral density
## [1] 31.22888
2*max(sp$spec)/U #upper bound for spectral density
## [1] 4550.14
# The 95% confidence interval is [31.22888, 4550.14]
fit33 <- auto.arima(df33, trace = TRUE) # Best model: ARIMA(2,1,2)(0,1,1)[12]
##
## Fitting models using approximations to speed things up...
##
## ARIMA(2,1,2)(1,1,1)[12] : 812.4419
## ARIMA(0,1,0)(0,1,0)[12] : 859.5941
## ARIMA(1,1,0)(1,1,0)[12] : 820.3644
## ARIMA(0,1,1)(0,1,1)[12] : 815.9909
## ARIMA(2,1,2)(0,1,1)[12] : 805.7274
## ARIMA(2,1,2)(0,1,0)[12] : 855.5499
## ARIMA(2,1,2)(0,1,2)[12] : 807.6211
## ARIMA(2,1,2)(1,1,0)[12] : 817.1942
## ARIMA(2,1,2)(1,1,2)[12] : 809.6784
## ARIMA(1,1,2)(0,1,1)[12] : 817.9261
## ARIMA(2,1,1)(0,1,1)[12] : 813.2687
## ARIMA(3,1,2)(0,1,1)[12] : 810.3778
## ARIMA(2,1,3)(0,1,1)[12] : 811.8729
## ARIMA(1,1,1)(0,1,1)[12] : 818.3023
## ARIMA(1,1,3)(0,1,1)[12] : 813.2287
## ARIMA(3,1,1)(0,1,1)[12] : 809.6983
## ARIMA(3,1,3)(0,1,1)[12] : 811.5295
##
## Now re-fitting the best model(s) without approximations...
##
## ARIMA(2,1,2)(0,1,1)[12] : 825.5495
##
## Best model: ARIMA(2,1,2)(0,1,1)[12]
sarima(df33, 2,1,2,0,1,1,12) # good fit
## initial value 0.611630
## iter 2 value 0.514917
## iter 3 value 0.502950
## iter 4 value 0.497509
## iter 5 value 0.494460
## iter 6 value 0.494060
## iter 7 value 0.493732
## iter 8 value 0.492755
## iter 9 value 0.492085
## iter 10 value 0.491685
## iter 11 value 0.491660
## iter 12 value 0.491657
## iter 13 value 0.491655
## iter 14 value 0.491633
## iter 15 value 0.491286
## iter 16 value 0.491085
## iter 17 value 0.490687
## iter 18 value 0.490617
## iter 19 value 0.490482
## iter 20 value 0.490400
## iter 21 value 0.490255
## iter 22 value 0.487365
## iter 23 value 0.485588
## iter 24 value 0.485003
## iter 25 value 0.484367
## iter 26 value 0.482405
## iter 27 value 0.481542
## iter 28 value 0.480885
## iter 29 value 0.475778
## iter 30 value 0.473318
## iter 31 value 0.472775
## iter 32 value 0.471360
## iter 33 value 0.470405
## iter 34 value 0.468233
## iter 35 value 0.467115
## iter 36 value 0.465970
## iter 37 value 0.465215
## iter 38 value 0.464134
## iter 39 value 0.463384
## iter 40 value 0.463122
## iter 41 value 0.463061
## iter 42 value 0.463048
## iter 43 value 0.463037
## iter 44 value 0.463032
## iter 45 value 0.463032
## iter 46 value 0.463032
## iter 46 value 0.463032
## iter 46 value 0.463032
## final value 0.463032
## converged
## initial value 0.475590
## iter 2 value 0.473109
## iter 3 value 0.472911
## iter 4 value 0.472845
## iter 5 value 0.472148
## iter 6 value 0.472138
## iter 7 value 0.472136
## iter 8 value 0.472135
## iter 9 value 0.472127
## iter 10 value 0.472115
## iter 11 value 0.472101
## iter 12 value 0.472098
## iter 13 value 0.472098
## iter 14 value 0.472098
## iter 15 value 0.472098
## iter 16 value 0.472098
## iter 17 value 0.472098
## iter 17 value 0.472098
## iter 17 value 0.472098
## final value 0.472098
## converged
## $fit
##
## Call:
## stats::arima(x = xdata, order = c(p, d, q), seasonal = list(order = c(P, D,
## Q), period = S), include.mean = !no.constant, transform.pars = trans, fixed = fixed,
## optim.control = list(trace = trc, REPORT = 1, reltol = tol))
##
## Coefficients:
## ar1 ar2 ma1 ma2 sma1
## -1.1088 -0.8565 0.9736 0.7177 -0.4960
## s.e. 0.1201 0.1754 0.1805 0.2449 0.0568
##
## sigma^2 estimated as 2.526: log likelihood = -406.57, aic = 825.15
##
## $degrees_of_freedom
## [1] 210
##
## $ttable
## Estimate SE t.value p.value
## ar1 -1.1088 0.1201 -9.2346 0.0000
## ar2 -0.8565 0.1754 -4.8837 0.0000
## ma1 0.9736 0.1805 5.3937 0.0000
## ma2 0.7177 0.2449 2.9306 0.0038
## sma1 -0.4960 0.0568 -8.7308 0.0000
##
## $AIC
## [1] 3.651087
##
## $AICc
## [1] 3.652294
##
## $BIC
## [1] 3.740573
plot(diff(df33,4)) # d=1
acf(diff(df33,4), lag.max=200) # MA:q= 0
pacf(diff(df33,4), lag.max=200) # AR:p= 1, 2
# try ARIMA(1,1,0)
fit3.1 <- Arima(df33, order=c(1, 1, 0), seasonal=c(0,4,0), method="ML",include.drift = TRUE)
## Warning in Arima(df33, order = c(1, 1, 0), seasonal = c(0, 4, 0), method =
## "ML", : No drift term fitted as the order of difference is 2 or more.
summary(fit3.1)
## Series: df33
## ARIMA(1,1,0)(0,4,0)[12]
##
## Coefficients:
## ar1
## -0.0911
## s.e. 0.0743
##
## sigma^2 estimated as 95.66: log likelihood=-697.4
## AIC=1398.8 AICc=1398.86 BIC=1405.17
##
## Training set error measures:
## ME RMSE MAE MPE MAPE MASE
## Training set 0.05204617 8.642069 5.868094 0.06058388 5.41006 1.546495
## ACF1
## Training set 0.01003916
checkresiduals(fit3.1)
##
## Ljung-Box test
##
## data: Residuals from ARIMA(1,1,0)(0,4,0)[12]
## Q* = 164.83, df = 23, p-value < 2.2e-16
##
## Model df: 1. Total lags used: 24
# AIC=3111.38 AICc=3111.4 BIC=3119.54
fit3.2 <- Arima(df33, order=c(2, 1, 0),seasonal=c(0,4,0), method="ML",include.drift = TRUE)
## Warning in Arima(df33, order = c(2, 1, 0), seasonal = c(0, 4, 0), method =
## "ML", : No drift term fitted as the order of difference is 2 or more.
summary(fit3.2)
## Series: df33
## ARIMA(2,1,0)(0,4,0)[12]
##
## Coefficients:
## ar1 ar2
## -0.0814 0.1074
## s.e. 0.0741 0.0740
##
## sigma^2 estimated as 95.06: log likelihood=-696.35
## AIC=1398.71 AICc=1398.84 BIC=1408.27
##
## Training set error measures:
## ME RMSE MAE MPE MAPE MASE
## Training set 0.04387278 8.590645 5.803412 0.05248819 5.349517 1.529449
## ACF1
## Training set -0.01498271
# AIC=3019.05 AICc=3019.1 BIC=3031.29
checkresiduals(fit3.2)
##
## Ljung-Box test
##
## data: Residuals from ARIMA(2,1,0)(0,4,0)[12]
## Q* = 155.59, df = 22, p-value < 2.2e-16
##
## Model df: 2. Total lags used: 24
pred33=forecast(fit33,h=48)
pred33 # The last 2 columns are 95% prediction intervals lower bound and upper bound
## Point Forecast Lo 80 Hi 80 Lo 95 Hi 95
## Jan 2019 122.6092 120.5480 124.6704 119.45692 125.7615
## Feb 2019 126.6568 123.9318 129.3817 122.48926 130.8243
## Mar 2019 125.0077 121.7389 128.2765 120.00851 130.0068
## Apr 2019 119.5537 115.7118 123.3955 113.67801 125.4293
## May 2019 118.8311 114.6042 123.0579 112.36661 125.2955
## Jun 2019 121.7887 117.1692 126.4083 114.72371 128.8538
## Jul 2019 119.1687 114.1463 124.1912 111.48755 126.8499
## Aug 2019 122.8566 117.5329 128.1803 114.71470 130.9984
## Sep 2019 123.6803 118.0272 129.3334 115.03459 132.3259
## Oct 2019 124.2781 118.3057 130.2505 115.14408 133.4120
## Nov 2019 128.5006 122.2679 134.7332 118.96859 138.0326
## Dec 2019 122.2585 115.7366 128.7804 112.28409 132.2329
## Jan 2020 125.3760 118.2263 132.5257 114.44144 136.3105
## Feb 2020 129.2488 121.6050 136.8927 117.55853 140.9391
## Mar 2020 127.9106 119.7576 136.0635 115.44173 140.3794
## Apr 2020 122.2615 113.6139 130.9092 109.03613 135.4870
## May 2020 121.4890 112.4244 130.5535 107.62585 135.3520
## Jun 2020 124.6691 115.1639 134.1743 110.13210 139.2061
## Jul 2020 121.8452 111.9237 131.7667 106.67164 137.0188
## Aug 2020 125.5686 115.2757 135.8614 109.82699 141.3101
## Sep 2020 126.5275 115.8422 137.2129 110.18572 142.8693
## Oct 2020 126.9449 115.8951 137.9948 110.04570 143.8442
## Nov 2020 131.2516 119.8612 142.6420 113.83151 148.6717
## Dec 2020 125.0707 113.3251 136.8164 107.10730 143.0342
## Jan 2021 128.0483 115.6924 140.4042 109.15152 146.9450
## Feb 2021 132.0238 119.1301 144.9176 112.30449 151.7432
## Mar 2021 130.6916 117.2585 144.1246 110.14750 151.2356
## Apr 2021 124.9479 110.9865 138.9094 103.59574 146.3001
## May 2021 124.2751 109.8335 138.7167 102.18855 146.3617
## Jun 2021 127.4256 112.4962 142.3551 104.59300 150.2583
## Jul 2021 124.5491 109.1489 139.9494 100.99654 148.1018
## Aug 2021 128.3562 112.5161 144.1963 104.13085 152.5815
## Sep 2021 129.2674 112.9797 145.5551 104.35754 154.1773
## Oct 2021 129.6661 112.9502 146.3819 104.10139 155.2308
## Nov 2021 134.0344 116.9090 151.1598 107.84340 160.2255
## Dec 2021 127.8012 110.2612 145.3413 100.97604 154.6264
## Jan 2022 130.7840 112.6029 148.9651 102.97838 158.5895
## Feb 2022 134.7986 116.0358 153.5614 106.10334 163.4939
## Mar 2022 133.4185 114.0770 152.7600 103.83824 162.9988
## Apr 2022 127.6944 107.7764 147.6124 97.23247 158.1563
## May 2022 127.0409 106.5901 147.4917 95.76409 158.3177
## Jun 2022 130.1533 109.1654 151.1412 98.05512 162.2515
## Jul 2022 127.3025 105.7879 148.8172 94.39878 160.2063
## Aug 2022 131.1137 109.1028 153.1246 97.45093 164.7765
## Sep 2022 131.9983 109.4852 154.5115 97.56747 166.4292
## Oct 2022 132.4230 109.4223 155.4236 97.24645 167.5995
## Nov 2022 136.7853 113.3170 160.2536 100.89370 172.6769
## Dec 2022 130.5365 106.5960 154.4770 93.92272 167.1503
autoplot(pred33) #plot with confidence interval
# fit ARIMA model without seasonal term
fit4=auto.arima(df33, seasonal = FALSE, trace = TRUE) # ARIMA(1,1,2)
##
## Fitting models using approximations to speed things up...
##
## ARIMA(2,1,2) with drift : 1265.845
## ARIMA(0,1,0) with drift : 1338.367
## ARIMA(1,1,0) with drift : 1318.347
## ARIMA(0,1,1) with drift : 1287.103
## ARIMA(0,1,0) : 1336.366
## ARIMA(1,1,2) with drift : 1262.375
## ARIMA(0,1,2) with drift : 1279.524
## ARIMA(1,1,1) with drift : 1280.805
## ARIMA(1,1,3) with drift : 1264.084
## ARIMA(0,1,3) with drift : 1271.954
## ARIMA(2,1,1) with drift : 1279.615
## ARIMA(2,1,3) with drift : 1267.933
## ARIMA(1,1,2) : 1260.591
## ARIMA(0,1,2) : 1277.917
## ARIMA(1,1,1) : 1278.922
## ARIMA(2,1,2) : 1263.951
## ARIMA(1,1,3) : 1262.254
## ARIMA(0,1,1) : 1285.394
## ARIMA(0,1,3) : 1270.212
## ARIMA(2,1,1) : 1277.722
## ARIMA(2,1,3) : 1266.006
##
## Now re-fitting the best model(s) without approximations...
##
## ARIMA(1,1,2) : 1265.904
##
## Best model: ARIMA(1,1,2)
df33.diff=diff(df33)
plot(df33.diff, main=" diff data")
df33.log=log(df33)
plot(df33.log, main=" log data")
di33.difflog=diff(log(df33))
plot(di33.difflog, main=" difflog data")
acf(di33.difflog, lag.max = 100) # from pacf plot, we could observe the seasonality
pacf(di33.difflog, lag.max = 100)# from pacf plot, AR(2) or AR(4)
acf2(df33^2) # accoring to pacf plot, ARCH(2)
## ACF PACF
## [1,] 0.76 0.76
## [2,] 0.65 0.18
## [3,] 0.66 0.30
## [4,] 0.62 0.04
## [5,] 0.63 0.22
## [6,] 0.58 -0.07
## [7,] 0.57 0.15
## [8,] 0.52 -0.14
## [9,] 0.51 0.14
## [10,] 0.45 -0.26
## [11,] 0.49 0.41
## [12,] 0.66 0.32
## [13,] 0.44 -0.65
## [14,] 0.33 -0.07
## [15,] 0.34 -0.01
## [16,] 0.30 0.11
## [17,] 0.32 -0.08
## [18,] 0.27 0.04
## [19,] 0.27 0.07
## [20,] 0.23 -0.03
## [21,] 0.23 0.12
## [22,] 0.19 0.12
## [23,] 0.24 0.02
## [24,] 0.39 -0.07
## [25,] 0.19 -0.13
## [26,] 0.10 -0.08
## [27,] 0.11 -0.03
## [28,] 0.10 0.03
## [29,] 0.12 -0.07
## [30,] 0.08 0.13
## [31,] 0.10 0.00
## [32,] 0.08 0.10
## [33,] 0.09 -0.01
## [34,] 0.06 -0.02
## [35,] 0.10 -0.01
## [36,] 0.25 0.07
## [37,] 0.08 -0.15
## [38,] -0.01 -0.03
## [39,] 0.01 -0.03
## [40,] -0.01 -0.09
## [41,] 0.01 -0.06
## [42,] -0.03 0.00
## [43,] -0.03 0.02
## [44,] -0.05 -0.05
## [45,] -0.03 0.06
## [46,] -0.08 0.00
## [47,] -0.05 -0.04
## [48,] 0.08 -0.06
# fit ARIMA(1,1,2) model, then plot the residuals ACF and PACF
arima112=arima(df33, order=c(1,1,2))
residual_arima112=arima112$residuals
acf2(residual_arima112^2)
## ACF PACF
## [1,] -0.05 -0.05
## [2,] -0.06 -0.06
## [3,] -0.03 -0.04
## [4,] -0.09 -0.10
## [5,] 0.28 0.27
## [6,] -0.17 -0.17
## [7,] 0.24 0.29
## [8,] -0.08 -0.14
## [9,] -0.03 0.11
## [10,] -0.07 -0.27
## [11,] -0.06 0.20
## [12,] 0.68 0.55
## [13,] -0.06 0.02
## [14,] -0.07 -0.11
## [15,] -0.05 0.03
## [16,] -0.08 -0.05
## [17,] 0.24 0.01
## [18,] -0.14 -0.02
## [19,] 0.20 0.08
## [20,] -0.09 -0.09
## [21,] -0.02 0.07
## [22,] -0.02 0.03
## [23,] -0.08 0.01
## [24,] 0.50 -0.03
## [25,] -0.09 0.00
## [26,] -0.07 -0.07
## [27,] -0.02 0.10
## [28,] -0.04 0.01
## [29,] 0.17 -0.04
## [30,] -0.12 -0.04
## [31,] 0.17 0.06
## [32,] -0.05 0.02
## [33,] -0.01 0.03
## [34,] -0.02 -0.06
## [35,] -0.07 0.04
## [36,] 0.47 0.19
## [37,] -0.06 0.07
## [38,] -0.07 -0.04
## [39,] -0.03 -0.06
## [40,] -0.04 -0.04
## [41,] 0.13 -0.04
## [42,] -0.10 0.04
## [43,] 0.16 0.05
## [44,] -0.02 0.02
## [45,] 0.01 0.02
## [46,] -0.02 0.06
## [47,] -0.07 -0.01
## [48,] 0.38 -0.03
par(mfrow=c(2,2))
acf(residual_arima112, main="ACF of residuals (Canada)", lag.max=100)
pacf(residual_arima112, main="PACF of residuals (Canada)", lag.max=100)
acf(residual_arima112^2, main="ACF of squared residuals (Canada)", lag.max=100)
pacf(residual_arima112^2, main="PACF of squared residuals (Canada)", lag.max=100)
garch_ca1=garch(residual_arima212,order=c(6,1),trace=F)
summary(garch_ca1)
##
## Call:
## garch(x = residual_arima212, order = c(6, 1), trace = F)
##
## Model:
## GARCH(6,1)
##
## Residuals:
## Min 1Q Median 3Q Max
## -4.72773 -0.55433 0.05367 0.78023 2.64293
##
## Coefficient(s):
## Estimate Std. Error t value Pr(>|t|)
## a0 1.204e+00 2.043e+00 0.590 0.555
## a1 6.910e-02 7.946e-02 0.870 0.385
## b1 7.853e-02 4.716e-01 0.167 0.868
## b2 6.655e-02 4.747e-01 0.140 0.889
## b3 1.636e-14 8.518e-01 0.000 1.000
## b4 6.499e-02 6.449e-01 0.101 0.920
## b5 6.557e-03 7.233e-01 0.009 0.993
## b6 8.502e-02 7.043e-01 0.121 0.904
##
## Diagnostic Tests:
## Jarque Bera Test
##
## data: Residuals
## X-squared = 9.3336, df = 2, p-value = 0.009402
##
##
## Box-Ljung test
##
## data: Squared.Residuals
## X-squared = 0.47876, df = 1, p-value = 0.489
AIC(garch_ca1)
## [1] 1656.202
garch_ca2=garch(residual_arima112,order=c(6,2),trace=F) # (q,p) # AIC is smaller
summary(garch_ca2)
##
## Call:
## garch(x = residual_arima112, order = c(6, 2), trace = F)
##
## Model:
## GARCH(6,2)
##
## Residuals:
## Min 1Q Median 3Q Max
## -3.5582 -0.6079 0.2180 0.7475 1.7741
##
## Coefficient(s):
## Estimate Std. Error t value Pr(>|t|)
## a0 8.902e+00 1.150e+01 0.774 0.439
## a1 8.104e-15 9.045e-02 0.000 1.000
## a2 4.910e-03 9.147e-02 0.054 0.957
## b1 5.858e-02 2.163e+01 0.003 0.998
## b2 5.763e-02 1.612e+01 0.004 0.997
## b3 6.608e-02 6.839e+00 0.010 0.992
## b4 6.506e-02 1.920e+01 0.003 0.997
## b5 6.710e-02 9.518e+00 0.007 0.994
## b6 6.719e-02 1.964e+01 0.003 0.997
##
## Diagnostic Tests:
## Jarque Bera Test
##
## data: Residuals
## X-squared = 26.185, df = 2, p-value = 2.061e-06
##
##
## Box-Ljung test
##
## data: Squared.Residuals
## X-squared = 0.54673, df = 1, p-value = 0.4597
AIC(garch_ca2) # the AIC is smaller, so GARCH(6,2) model is better
## [1] 1248.765